Here is snipet code to run python .
It does read from a file with domain names , one per row.
It does produce emails in an output file, one email per row.
import json
import requests
from concurrent.futures import ThreadPoolExecutor
# File paths
input_file_path = 'domains.txt' # Replace 'file_A.csv' with your input file path
output_file_path = 'emails_output.txt' # Replace 'file_B.csv' with your output file path
key =''
# Function to process a single row
def process_row(row):
api_url = 'https://api.minelead.io/v1/search/?domain='+row+'&key='+key+'&max-emails=3'
response = requests.get(api_url)
data = response.content
try:
data = json.loads(data.strip())
return data
except json.JSONDecodeError:
print(f"Error decoding JSON in row: {row}")
return None
# Function to extract emails from data and write them to output file
def extract_and_write_emails(data, output_file):
if 'emails' in data:
emails = [email['email'] for email in data['emails']]
domain = data['domain']
with open('emails_output.txt', 'a') as file:
for email in emails:
file.write(f"{domain}, {email}\n")
# Open input and output files
with open(input_file_path, 'r') as input_file, open(output_file_path, 'a') as output_file:
# Create a thread pool executor
with ThreadPoolExecutor(max_workers=3) as executor:
# Process each row in a separate thread
futures = [executor.submit(process_row, row) for row in input_file]
# Process the results
for future in futures:
data = future.result()
if data:
extract_and_write_emails(data, output_file)